From d7981a98b85da107469c9ea67375243471ef421a Mon Sep 17 00:00:00 2001 From: Keir Fraser Date: Tue, 15 Sep 2009 09:54:16 +0100 Subject: [PATCH] x86: Free unused pages of per-cpu data. As well as freeing data pages for impossible cpus, we also free pages of all other cpus which contain no actual data (because of too-large statically-defined PERCPU_SHIFT). Signed-off-by: Keir Fraser --- xen/arch/x86/setup.c | 41 ++++++++++++++++++++--------------------- 1 file changed, 20 insertions(+), 21 deletions(-) diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c index 6518152985..23f3a40728 100644 --- a/xen/arch/x86/setup.c +++ b/xen/arch/x86/setup.c @@ -191,6 +191,18 @@ void __init discard_initial_images(void) init_domheap_pages(initial_images_base, initial_images_end); } +static void free_xen_data(char *s, char *e) +{ +#ifndef MEMORY_GUARD + init_xenheap_pages(__pa(s), __pa(e)); +#endif + memguard_guard_range(s, e-s); +#if defined(CONFIG_X86_64) + /* Also zap the mapping in the 1:1 area. */ + memguard_guard_range(__va(__pa(s)), e-s); +#endif +} + extern char __init_begin[], __bss_start[]; extern char __per_cpu_start[], __per_cpu_data_end[]; @@ -212,19 +224,13 @@ static void __init percpu_init_areas(void) for ( ; i < NR_CPUS; i++ ) BUG_ON(cpu_possible(i)); -#ifndef MEMORY_GUARD - init_xenheap_pages(__pa(__per_cpu_start) + (first_unused << PERCPU_SHIFT), - __pa(__bss_start)); -#endif - memguard_guard_range(&__per_cpu_start[first_unused << PERCPU_SHIFT], - __bss_start - &__per_cpu_start[first_unused << - PERCPU_SHIFT]); -#if defined(CONFIG_X86_64) - /* Also zap the mapping in the 1:1 area. */ - memguard_guard_range(__va(__pa(__per_cpu_start)) + - (first_unused << PERCPU_SHIFT), - (NR_CPUS - first_unused) << PERCPU_SHIFT); -#endif + free_xen_data(&__per_cpu_start[first_unused << PERCPU_SHIFT], __bss_start); + + data_size = (data_size + PAGE_SIZE + 1) & PAGE_MASK; + if ( data_size != PERCPU_SIZE ) + for ( i = 0; i < first_unused; i++ ) + free_xen_data(&__per_cpu_start[(i << PERCPU_SHIFT) + data_size], + &__per_cpu_start[(i+1) << PERCPU_SHIFT]); } static void __init init_idle_domain(void) @@ -391,14 +397,7 @@ void init_done(void) /* Free (or page-protect) the init areas. */ memset(__init_begin, 0xcc, __init_end - __init_begin); /* int3 poison */ -#ifndef MEMORY_GUARD - init_xenheap_pages(__pa(__init_begin), __pa(__init_end)); -#endif - memguard_guard_range(__init_begin, __init_end - __init_begin); -#if defined(CONFIG_X86_64) - /* Also zap the mapping in the 1:1 area. */ - memguard_guard_range(__va(__pa(__init_begin)), __init_end - __init_begin); -#endif + free_xen_data(__init_begin, __init_end); printk("Freed %ldkB init memory.\n", (long)(__init_end-__init_begin)>>10); startup_cpu_idle_loop(); -- 2.30.2